import os
import numpy as np
import pandas as pd
from glob import glob
import shutil
# image
import cv2
from skimage.io import imread
# TensorFlow
import tensorflow as tf
from tensorflow.keras import layers, models
# Visualisation libraries
## Text
from colorama import Fore, Back, Style
from IPython.display import Image, display, Markdown, Latex, clear_output
## progressbar
import progressbar
## plotly
from plotly.offline import init_notebook_mode, iplot
import plotly.graph_objs as go
import plotly.offline as py
from plotly.subplots import make_subplots
import plotly.express as px
## seaborn
import seaborn as sns
## matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse, Polygon
from matplotlib.font_manager import FontProperties
import matplotlib.colors as mcolors
from matplotlib.colors import LinearSegmentedColormap
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from matplotlib import cm
plt.style.use('seaborn-whitegrid')
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
plt.rcParams['text.color'] = 'k'
%matplotlib inline
import warnings
warnings.filterwarnings("ignore")

In this dataset, you are provided with a large number of small pathology images to classify. Files are named with an image id. The train_labels.csv file provides the ground truth for the images in the train folder. You are predicting the labels for the images in the test folder. A positive label indicates that the center 32x32px region of a patch contains at least one pixel of tumor tissue. Tumor tissue in the outer region of the patch does not influence the label. This outer region is provided to enable fully-convolutional models that do not use zero-padding, to ensure consistent behavior when applied to a whole-slide image.
The original PCam dataset contains duplicate images due to its probabilistic sampling, however, the version presented on Kaggle does not contain duplicates. We have otherwise maintained the same data and splits as the PCam benchmark.
Path = 'hcd'
path_dict = {os.path.splitext(os.path.basename(x))[0]: x for x in glob(os.path.join(Path, '*', '*.tif'))}
Data = pd.read_csv(os.path.join(Path,"train_labels.csv"))
Data.columns = [x.title().replace('Id','ID') for x in Data.columns]
Data['Path'] = Data['ID'].map(path_dict)
display(Data.head(10))
display(pd.DataFrame({'Number of Instances':[Data.shape[0]], 'Number of Attributes':[Data.shape[1]]}).style.hide_index())
| ID | Label | Path | |
|---|---|---|---|
| 0 | f38a6374c348f90b587e046aac6079959adf3835 | 0 | hcd\train\f38a6374c348f90b587e046aac6079959adf... |
| 1 | c18f2d887b7ae4f6742ee445113fa1aef383ed77 | 1 | hcd\train\c18f2d887b7ae4f6742ee445113fa1aef383... |
| 2 | 755db6279dae599ebb4d39a9123cce439965282d | 0 | hcd\train\755db6279dae599ebb4d39a9123cce439965... |
| 3 | bc3f0c64fb968ff4a8bd33af6971ecae77c75e08 | 0 | hcd\train\bc3f0c64fb968ff4a8bd33af6971ecae77c7... |
| 4 | 068aba587a4950175d04c680d38943fd488d6a9d | 0 | hcd\train\068aba587a4950175d04c680d38943fd488d... |
| 5 | acfe80838488fae3c89bd21ade75be5c34e66be7 | 0 | hcd\train\acfe80838488fae3c89bd21ade75be5c34e6... |
| 6 | a24ce148f6ffa7ef8eefb4efb12ebffe8dd700da | 1 | hcd\train\a24ce148f6ffa7ef8eefb4efb12ebffe8dd7... |
| 7 | 7f6ccae485af121e0b6ee733022e226ee6b0c65f | 1 | hcd\train\7f6ccae485af121e0b6ee733022e226ee6b0... |
| 8 | 559e55a64c9ba828f700e948f6886f4cea919261 | 0 | hcd\train\559e55a64c9ba828f700e948f6886f4cea91... |
| 9 | 8eaaa7a400aa79d36c2440a4aa101cc14256cda4 | 0 | hcd\train\8eaaa7a400aa79d36c2440a4aa101cc14256... |
| Number of Instances | Number of Attributes |
|---|---|
| 220025 | 3 |
def DatasetTargetDist(Inp, Target, PD, Labels_dict = None):
# Table
Table = Inp[Target].value_counts().to_frame('Count').reset_index(drop = False).rename(columns = {'index':Target})
if not Labels_dict == None:
Table[Target] = Table[Target].replace(Labels_dict)
Table['Percentage'] = np.round(100*(Table['Count']/Table['Count'].sum()),2)
fig = make_subplots(rows=1, cols=2, horizontal_spacing = 0.02, column_widths=PD['column_widths'],
specs=[[{"type": "table"},{"type": "pie"}]])
# Right
fig.add_trace(go.Pie(labels=Table[Target].values, values=Table['Count'].values,
pull=PD['pull'], textfont=dict(size= PD['textfont']),
marker=dict(colors = PD['PieColors'], line=dict(color='black', width=1))), row=1, col=2)
fig.update_traces(hole=PD['hole'])
fig.update_layout(height = PD['height'], legend=dict(orientation=PD['legend_orientation']),
legend_title_text= PD['legend_title'])
# Left
T = Table.copy()
T['Percentage'] = T['Percentage'].map(lambda x: '%%%.2f' % x)
Temp = []
for i in T.columns:
Temp.append(T.loc[:,i].values)
fig.add_trace(go.Table(header=dict(values = list(Table.columns), line_color='darkslategray',
fill_color= PD['TableColors'][0], align=['center','center'],
font=dict(color='white', size=12), height=25), columnwidth = PD['tablecolumnwidth'],
cells=dict(values=Temp, line_color='darkslategray',
fill=dict(color= [PD['TableColors'][1], PD['TableColors'][1]]),
align=['center','center', 'center'], font_size=12, height=20)), 1, 1)
if not PD['Title'] == None:
fig.update_layout(title={'text': '<b>' + PD['Title'] + '<b>', 'x':PD['title_x'],
'y':PD['title_y'], 'xanchor': 'center', 'yanchor': 'top'})
fig.show()
Feat = 'Label'
Name = 'Labels'
Pull = [0 for x in range((len(Data[Feat])-1))]
Pull.append(.05)
PD = dict(PieColors = ['DarkGreen','FireBrick'], TableColors = ['DarkSlateGray','AliceBlue'], hole = .4,
column_widths=[0.5, 0.5],textfont = 14, height = 400, tablecolumnwidth = [.1, .05, .08],
pull = Pull, legend_title = Name, legend_orientation = 'v', Title = None, title_x = 0.5, title_y = 0.85)
del Pull
DatasetTargetDist(Data, Target = 'Label', PD = PD)
We take a sample of a dataset where all labels have a similar number of instances.
n = 5000
Sample = pd.DataFrame()
for c in Data['Label'].unique():
Sample = pd.concat([Sample, Data.loc[Data['Label'] == c][:n] ])
Note that, now,
DatasetTargetDist(Sample, Target = 'Label', PD = PD)
We can convert the TIFF images to JPEG files and copy them into a new directory.
FilesInfo = Sample.copy()
FilesInfo['ID'] = FilesInfo['ID'].map(lambda x: x + '.tif')
FilesInfo['Label'] = FilesInfo['Label'].astype(str)
dst = os.path.join(Path, 'train_mod')
def ImgSep(Inp, Target, file_name_col, src_path_col, dst, Convert_to_jpg = False):
if os.path.exists(dst):
shutil.rmtree(dst)
for subfolder in Inp[Target].unique().tolist():
if not os.path.exists(os.path.join(dst, subfolder)):
os.makedirs(os.path.join(dst, subfolder))
Counter = 0
Progress_Bar = progressbar.ProgressBar(maxval=Data.shape[0],
widgets=[progressbar.Bar('#', '|', '|'), progressbar.Percentage()])
Progress_Bar.start()
for _, row in Inp.iterrows():
# from the current dir to a new one
if Convert_to_jpg:
cv2.imwrite(os.path.join(dst, row[Target], row[file_name_col].split('.')[0] + '.jpg'),
imread(row[src_path_col]), [int(cv2.IMWRITE_JPEG_QUALITY), 100])
else:
shutil.copy(os.path.join(src, row[file_name_col]), row[src_path_col])
Progress_Bar.update(Counter)
Counter+=1
Progress_Bar.finish()
ImgSep(Inp = FilesInfo, Target = 'Label', file_name_col = 'ID',
src_path_col = 'Path', dst = dst, Convert_to_jpg = True)
Path = dst
del dst
|#########################################################################|100%
Now,
def Path_Tree(PATH, Extension, ):
Out = {}
sep = ' ' * 3
BACK = {'Black': Back.BLACK, 'Red':Back.RED, 'Green':Back.GREEN, 'Yellow': Back.YELLOW, 'Blue': Back.BLUE,
'Magenta':Back.MAGENTA, 'Cyan': Back.CYAN}
title = PATH.split('\\')[-1]
print(Style.RESET_ALL + Fore.BLUE + Style.NORMAL + '=' * (len(title) +1) + Style.RESET_ALL)
print(Back.BLACK + Fore.CYAN + Style.NORMAL + title+':'+ Style.RESET_ALL)
print(Style.RESET_ALL + Fore.BLUE + Style.NORMAL + '=' * (len(title) +1)+ Style.RESET_ALL)
i = 0
C = ['Red', 'Green', 'Yellow', 'Blue', 'Magenta', 'Cyan']*len(os.listdir(PATH))
for entry in os.listdir(PATH):
if os.path.isdir(os.path.join (PATH, entry)):
print('└──',BACK[C[i]] + Fore.BLACK + Style.NORMAL + entry+':'+ Style.RESET_ALL)
Sub = os.path.join (PATH, entry)
List = os.listdir(Sub)
List = [x for x in List if x.endswith(Extension)]
Out[entry] = List
print(2* sep, Fore.BLUE + Style.NORMAL +
'%i %s files:' % (len(List), List[0].split('.')[-1].upper()) + Style.RESET_ALL)
print(2* sep, ', '.join(List[:5]) + ', ...')
i+=1
return Out
_ = Path_Tree(Path, '.jpg')
========== train_mod: ========== └── 0: 5000 JPG files: 001ee9679b82132638bd63fb4872a5132f27b8b0.jpg, 00297676c9b3076e2eb4689a63477ef3399d598a.jpg, 005f4f493ec7f9f506f7435c173a4f31097f977c.jpg, 00630e2f3709f1d9e09aa2c9e49441da1f34b8f9.jpg, 007cdce083f4b56d64b0ea235dae21cc59a2f9aa.jpg, ... └── 1: 5000 JPG files: 000ef1b51b71267e85aca599d4def578d870c9e0.jpg, 0011eb0c482ab5752cc6e84561f50200d7c1db30.jpg, 001514e67fb8e4678373106c491fbf172378232c.jpg, 001522991c0eab8b47769f2a0f852bc0891c3af2.jpg, 0019251714cd79d269db386da1dedd9f4d185827.jpg, ...
batch_size = 128
(Img_Height, Img_Width, _) = imread(Sample['Path'][0]).shape
train_ds = tf.keras.preprocessing.image_dataset_from_directory(directory= Path, validation_split=0.2, subset="training",
seed=123, image_size=(Img_Height, Img_Width),
batch_size=batch_size)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(directory= Path, validation_split=0.2, subset="validation",
seed=123, image_size=(Img_Height, Img_Width),
batch_size=batch_size)
Found 10000 files belonging to 2 classes. Using 8000 files for training. Found 10000 files belonging to 2 classes. Using 2000 files for validation.
fig, ax = plt.subplots(6, 6 , figsize = (16, 17))
_ = fig.suptitle('A Sample of Dataset', fontweight='bold', fontsize = 18)
ax = ax.ravel()
class_names = train_ds.class_names
for images, labels in train_ds.take(1):
for i in range(len(ax)):
_ = ax[i].imshow(images[i].numpy().astype("uint8"), cmap='bone')
_ = ax[i].set_title('Label: %s' % class_names[labels[i]],
fontweight='bold', fontsize = 12)
_ = ax[i].axis("off")
_ = ax[i].set_aspect(1)
fig.tight_layout()
A multi-layer perceptron (MLP) is a class of feedforward artificial neural network (ANN). The algorithm at each iteration uses the Cross-Entropy Loss to measure the loss, and then the gradient and the model update is calculated. At the end of this iterative process, we would reach a better level of agreement between test and predicted sets since the error would be lower from that of the first step.
Here, we have a small dataset that might result in Overfitting. Thus, we can define a Data augmentation function that generates additional training data from the existing examples by augmenting them using random transformations that yield believable-looking images.
model = models.Sequential([layers.experimental.preprocessing.Rescaling(1./255, input_shape=(Img_Height, Img_Width, 3)),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(len(Sample['Label'].unique()))])
model.summary()
tf.keras.utils.plot_model(model, show_shapes=True, show_layer_names=True, expand_nested = True)
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= rescaling (Rescaling) (None, 96, 96, 3) 0 _________________________________________________________________ conv2d (Conv2D) (None, 96, 96, 16) 448 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 48, 48, 16) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 48, 48, 32) 4640 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 24, 24, 32) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 24, 24, 64) 18496 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 12, 12, 64) 0 _________________________________________________________________ flatten (Flatten) (None, 9216) 0 _________________________________________________________________ dense (Dense) (None, 128) 1179776 _________________________________________________________________ dense_1 (Dense) (None, 2) 258 ================================================================= Total params: 1,203,618 Trainable params: 1,203,618 Non-trainable params: 0 _________________________________________________________________
Compiling and fitting the model
# Number of iterations
IT = 11
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
# Training the model
history = model.fit(train_ds, validation_data=val_ds, epochs=IT, verbose = 0)
def Search_List(Key, List): return [s for s in List if Key in s]
Metrics_Names = {'loss':'Loss', 'accuracy':'Accuracy', 'mae':'MAE', 'mse':'MSE', 'recall': 'Recall'}
def Table_modify(df, Metrics_Names = Metrics_Names):
df = df.rename(columns = Metrics_Names)
df = df.reindex(sorted(df.columns), axis=1)
df.insert(loc = 0, column = 'Iteration', value = np.arange(0, df.shape[0]), allow_duplicates=False)
return df
Validation_Table = Search_List('val_',history.history.keys())
Train_Table = list(set( history.history.keys()) - set(Validation_Table))
Validation_Table = pd.DataFrame(np.array([history.history[x] for x in Validation_Table]).T, columns = Validation_Table)
Train_Table = pd.DataFrame(np.array([history.history[x] for x in Train_Table]).T, columns = Train_Table)
Validation_Table.columns = [x.replace('val_','') for x in Validation_Table.columns]
Train_Table = Table_modify(Train_Table)
Validation_Table = Table_modify(Validation_Table)
# Train Set Score
score = model.evaluate(train_ds, batch_size = batch_size, verbose = 0)
score = pd.DataFrame(score, index = model.metrics_names).T
score.index = ['Train Set Score']
# Validation Set Score
Temp = model.evaluate(val_ds, batch_size = batch_size, verbose = 0)
Temp = pd.DataFrame(Temp, index = model.metrics_names).T
Temp.index = ['Validation Set Score']
score = score.append(Temp)
score.rename(columns= Metrics_Names, inplace = True)
score = score.reindex(sorted(score.columns), axis=1)
display(score.style.set_precision(4))
| Accuracy | Loss | |
|---|---|---|
| Train Set Score | 0.8446 | 0.3650 |
| Validation Set Score | 0.7945 | 0.4398 |
def Plot_history(history, PD, Title = False, metrics_names = [x.title() for x in model.metrics_names]):
fig = make_subplots(rows=1, cols=2, horizontal_spacing = 0.02, column_widths=[0.6, 0.4],
specs=[[{"type": "scatter"},{"type": "table"}]])
# Left
Colors = ['OrangeRed', 'MidnightBlue', 'purple']
for j in range(len(metrics_names)):
fig.add_trace(go.Scatter(x= history['Iteration'].values, y= history[metrics_names[j]].values,
line=dict(color=Colors[j], width= 1.5), name = metrics_names[j]), 1, 1)
fig.update_layout(legend=dict(x=0, y=1.1, traceorder='reversed', font_size=12),
dragmode='select', plot_bgcolor= 'white', height=600, hovermode='closest',
legend_orientation='h')
fig.update_xaxes(range=[history.Iteration.min(), history.Iteration.max()],
showgrid=True, gridwidth=1, gridcolor='Lightgray',
showline=True, linewidth=1, linecolor='Lightgray', mirror=True, row=1, col=1)
fig.update_yaxes(range=[0, PD['yLim']], showgrid=True, gridwidth=1, gridcolor='Lightgray',
showline=True, linewidth=1, linecolor='Lightgray', mirror=True, row=1, col=1)
# Right
if not PD['Table_Rows'] == None:
ind = np.linspace(0, history.shape[0], PD['Table_Rows'], endpoint = False).round(0).astype(int)
ind = np.append(ind, history.index[-1])
history = history[history.index.isin(ind)]
T = history.copy()
T[metrics_names] = T[metrics_names].applymap(lambda x: '%.4e' % x)
Temp = []
for i in T.columns:
Temp.append(T.loc[:,i].values)
TableColors = PD['TableColors']
fig.add_trace(go.Table(header=dict(values = list(history.columns), line_color=TableColors[0],
fill_color=TableColors[0], align=['center','center'], font=dict(color=TableColors[1], size=12), height=25),
columnwidth = PD['tablecolumnwidth'], cells=dict(values=Temp, line_color=TableColors[0],
fill=dict(color=[TableColors[1], TableColors[1]]),
align=['center', 'center'], font_size=12,height=20)), 1, 2)
if Title != False:
fig.update_layout(plot_bgcolor= 'white',
title={'text': Title, 'x':0.46, 'y':0.94, 'xanchor': 'center', 'yanchor': 'top'},
yaxis_title='Frequency')
fig.show()
PD = dict(Table_Rows = 25, yLim = 1.4, tablecolumnwidth = [0.3, 0.4, 0.4], TableColors = ['Navy','White'])
Plot_history(Train_Table, Title = 'Train Set', PD = PD)
Plot_history(Validation_Table, Title = 'Validation Set', PD = PD)
Here, we only went through a few iterations; however, we need to train the model for more iterations to get more accurate results.
B. S. Veeling, J. Linmans, J. Winkens, T. Cohen, M. Welling. "Rotation Equivariant CNNs for Digital Pathology". arXiv:1806.03962
Ehteshami Bejnordi et al. Diagnostic Assessment of Deep Learning Algorithms for Detection of Lymph Node Metastases in Women With Breast Cancer. JAMA: The Journal of the American Medical Association, 318(22), 2199–2210. doi:jama.2017.14585